From 8e73be0de815ce9a70e222c9b28d91925d40c21f Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Mon, 9 Feb 2009 14:23:51 +0000 Subject: [PATCH] vtd: adding support for multiple queued invalidation pages Signed-off-by: Allen Kay --- xen/drivers/passthrough/vtd/ia64/vtd.c | 7 ++++--- xen/drivers/passthrough/vtd/intremap.c | 2 +- xen/drivers/passthrough/vtd/iommu.c | 12 ++++++------ xen/drivers/passthrough/vtd/iommu.h | 4 +++- xen/drivers/passthrough/vtd/qinval.c | 4 +++- xen/drivers/passthrough/vtd/vtd.h | 4 ++-- xen/drivers/passthrough/vtd/x86/vtd.c | 9 +++++---- 7 files changed, 24 insertions(+), 18 deletions(-) diff --git a/xen/drivers/passthrough/vtd/ia64/vtd.c b/xen/drivers/passthrough/vtd/ia64/vtd.c index eac01a81d2..142f9e1c2c 100644 --- a/xen/drivers/passthrough/vtd/ia64/vtd.c +++ b/xen/drivers/passthrough/vtd/ia64/vtd.c @@ -45,16 +45,17 @@ void unmap_vtd_domain_page(void *va) } /* Allocate page table, return its machine address */ -u64 alloc_pgtable_maddr(struct domain *d) +u64 alloc_pgtable_maddr(struct domain *d, unsigned long npages) { struct page_info *pg; u64 *vaddr; - pg = alloc_domheap_page(NULL, d ? MEMF_node(domain_to_node(d)) : 0); + pg = alloc_domheap_pages(NULL, get_order_from_pages(npages), + d ? MEMF_node(domain_to_node(d)) : 0); vaddr = map_domain_page(page_to_mfn(pg)); if ( !vaddr ) return 0; - memset(vaddr, 0, PAGE_SIZE); + memset(vaddr, 0, PAGE_SIZE * npages); iommu_flush_cache_page(vaddr); unmap_domain_page(vaddr); diff --git a/xen/drivers/passthrough/vtd/intremap.c b/xen/drivers/passthrough/vtd/intremap.c index c9a73f50c4..063031799d 100644 --- a/xen/drivers/passthrough/vtd/intremap.c +++ b/xen/drivers/passthrough/vtd/intremap.c @@ -502,7 +502,7 @@ int intremap_setup(struct iommu *iommu) ir_ctrl = iommu_ir_ctrl(iommu); if ( ir_ctrl->iremap_maddr == 0 ) { - ir_ctrl->iremap_maddr = alloc_pgtable_maddr(NULL); + ir_ctrl->iremap_maddr = alloc_pgtable_maddr(NULL, 1); if ( ir_ctrl->iremap_maddr == 0 ) { dprintk(XENLOG_WARNING VTDPREFIX, diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c index cec1e9db43..7237acbc49 100644 --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -129,9 +129,9 @@ void iommu_flush_cache_entry(void *addr) __iommu_flush_cache(addr, 8); } -void iommu_flush_cache_page(void *addr) +void iommu_flush_cache_page(void *addr, unsigned long npages) { - __iommu_flush_cache(addr, PAGE_SIZE_4K); + __iommu_flush_cache(addr, PAGE_SIZE_4K * npages); } int nr_iommus; @@ -146,7 +146,7 @@ static u64 bus_to_context_maddr(struct iommu *iommu, u8 bus) root = &root_entries[bus]; if ( !root_present(*root) ) { - maddr = alloc_pgtable_maddr(NULL); + maddr = alloc_pgtable_maddr(NULL, 1); if ( maddr == 0 ) { unmap_vtd_domain_page(root_entries); @@ -174,7 +174,7 @@ static u64 addr_to_dma_page_maddr(struct domain *domain, u64 addr, int alloc) addr &= (((u64)1) << addr_width) - 1; ASSERT(spin_is_locked(&hd->mapping_lock)); if ( hd->pgd_maddr == 0 ) - if ( !alloc || ((hd->pgd_maddr = alloc_pgtable_maddr(domain)) == 0) ) + if ( !alloc || ((hd->pgd_maddr = alloc_pgtable_maddr(domain, 1)) == 0) ) goto out; parent = (struct dma_pte *)map_vtd_domain_page(hd->pgd_maddr); @@ -187,7 +187,7 @@ static u64 addr_to_dma_page_maddr(struct domain *domain, u64 addr, int alloc) { if ( !alloc ) break; - maddr = alloc_pgtable_maddr(domain); + maddr = alloc_pgtable_maddr(domain, 1); if ( !maddr ) break; dma_set_pte_addr(*pte, maddr); @@ -577,7 +577,7 @@ static int iommu_set_root_entry(struct iommu *iommu) spin_lock(&iommu->lock); if ( iommu->root_maddr == 0 ) - iommu->root_maddr = alloc_pgtable_maddr(NULL); + iommu->root_maddr = alloc_pgtable_maddr(NULL, 1); if ( iommu->root_maddr == 0 ) { spin_unlock(&iommu->lock); diff --git a/xen/drivers/passthrough/vtd/iommu.h b/xen/drivers/passthrough/vtd/iommu.h index dbe0f3c1ad..46eb4cdb40 100644 --- a/xen/drivers/passthrough/vtd/iommu.h +++ b/xen/drivers/passthrough/vtd/iommu.h @@ -397,7 +397,9 @@ struct poll_info { u32 udata; }; -#define QINVAL_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct qinval_entry)) +#define MAX_QINVAL_PAGES 8 +#define NUM_QINVAL_PAGES 1 +#define QINVAL_ENTRY_NR (PAGE_SIZE_4K*NUM_QINVAL_PAGES/sizeof(struct qinval_entry)) #define qinval_present(v) ((v).lo & 1) #define qinval_fault_disable(v) (((v).lo >> 1) & 1) diff --git a/xen/drivers/passthrough/vtd/qinval.c b/xen/drivers/passthrough/vtd/qinval.c index 47707de44c..1203b0c271 100644 --- a/xen/drivers/passthrough/vtd/qinval.c +++ b/xen/drivers/passthrough/vtd/qinval.c @@ -427,7 +427,7 @@ int qinval_setup(struct iommu *iommu) if ( qi_ctrl->qinval_maddr == 0 ) { - qi_ctrl->qinval_maddr = alloc_pgtable_maddr(NULL); + qi_ctrl->qinval_maddr = alloc_pgtable_maddr(NULL, NUM_QINVAL_PAGES); if ( qi_ctrl->qinval_maddr == 0 ) { dprintk(XENLOG_WARNING VTDPREFIX, @@ -445,6 +445,8 @@ int qinval_setup(struct iommu *iommu) * registers are automatically reset to 0 with write * to IQA register. */ + if ( NUM_QINVAL_PAGES <= MAX_QINVAL_PAGES ) + qi_ctrl->qinval_maddr |= NUM_QINVAL_PAGES - 1; dmar_writeq(iommu->reg, DMAR_IQA_REG, qi_ctrl->qinval_maddr); /* enable queued invalidation hardware */ diff --git a/xen/drivers/passthrough/vtd/vtd.h b/xen/drivers/passthrough/vtd/vtd.h index ec02d129d8..d119117068 100644 --- a/xen/drivers/passthrough/vtd/vtd.h +++ b/xen/drivers/passthrough/vtd/vtd.h @@ -101,12 +101,12 @@ unsigned int get_cache_line_size(void); void cacheline_flush(char *); void flush_all_cache(void); void *map_to_nocache_virt(int nr_iommus, u64 maddr); -u64 alloc_pgtable_maddr(struct domain *d); +u64 alloc_pgtable_maddr(struct domain *d, unsigned long npages); void free_pgtable_maddr(u64 maddr); void *map_vtd_domain_page(u64 maddr); void unmap_vtd_domain_page(void *va); void iommu_flush_cache_entry(void *addr); -void iommu_flush_cache_page(void *addr); +void iommu_flush_cache_page(void *addr, unsigned long npages); #endif // _VTD_H_ diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c b/xen/drivers/passthrough/vtd/x86/vtd.c index 31dc561881..18ff1187fd 100644 --- a/xen/drivers/passthrough/vtd/x86/vtd.c +++ b/xen/drivers/passthrough/vtd/x86/vtd.c @@ -38,20 +38,21 @@ void unmap_vtd_domain_page(void *va) } /* Allocate page table, return its machine address */ -u64 alloc_pgtable_maddr(struct domain *d) +u64 alloc_pgtable_maddr(struct domain *d, unsigned long npages) { struct page_info *pg; u64 *vaddr; unsigned long mfn; - pg = alloc_domheap_page(NULL, d ? MEMF_node(domain_to_node(d)) : 0); + pg = alloc_domheap_pages(NULL, get_order_from_pages(npages), + d ? MEMF_node(domain_to_node(d)) : 0); if ( !pg ) return 0; mfn = page_to_mfn(pg); vaddr = map_domain_page(mfn); - memset(vaddr, 0, PAGE_SIZE); + memset(vaddr, 0, PAGE_SIZE * npages); - iommu_flush_cache_page(vaddr); + iommu_flush_cache_page(vaddr, npages); unmap_domain_page(vaddr); return (u64)mfn << PAGE_SHIFT_4K; -- 2.30.2